Get rid of more uses of batched mmu interface.
Signed-off-by: Keir Fraser <keir@xensource.com>
set_call_gate(&default_ldt[0],lcall7);
set_call_gate(&default_ldt[4],lcall27);
__make_page_readonly(&default_ldt[0]);
- flush_page_update_queue();
cpu_init();
}
extern struct desc_struct default_ldt[];
-static inline void clear_LDT(void)
-{
- /*
- * NB. We load the default_ldt for lcall7/27 handling on demand, as
- * it slows down context switching. Noone uses it anyway.
- */
- queue_set_ldt(0, 0);
-}
-
static inline void load_LDT(mm_context_t *pc)
{
void *segments = pc->ldt;
if ( count == 0 )
segments = NULL;
- queue_set_ldt((unsigned long)segments, count);
+ xen_set_ldt((unsigned long)segments, count);
}
#endif /* __ASSEMBLY__ */
init_mm.pgd + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
__make_page_readonly(pgd);
- queue_pgd_pin(__pa(pgd));
- flush_page_update_queue();
+ xen_pgd_pin(__pa(pgd));
}
return pgd;
}
free_page((unsigned long)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pae_pgd_cachep, pgd);
#else
- queue_pgd_unpin(__pa(pgd));
+ xen_pgd_unpin(__pa(pgd));
__make_page_writable(pgd);
- flush_page_update_queue();
free_page((unsigned long)pgd);
#endif
}
{
clear_page(pte);
__make_page_readonly(pte);
- queue_pte_pin(__pa(pte));
- flush_page_update_queue();
+ xen_pte_pin(__pa(pte));
}
return pte;
static __inline__ void pte_free_slow(pte_t *pte)
{
- queue_pte_unpin(__pa(pte));
+ xen_pte_unpin(__pa(pte));
__make_page_writable(pte);
- flush_page_update_queue();
free_page((unsigned long)pte);
}
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+ set_pte(pte, pte_wrprotect(*pte));
}
static inline void __make_page_writable(void *va)
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+ set_pte(pte, pte_mkwrite(*pte));
}
static inline void make_page_readonly(void *va)
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+ set_pte(pte, pte_wrprotect(*pte));
if ( (unsigned long)va >= VMALLOC_START )
__make_page_readonly(machine_to_virt(
*(unsigned long *)pte&PAGE_MASK));
pgd_t *pgd = pgd_offset_k((unsigned long)va);
pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
pte_t *pte = pte_offset(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+ set_pte(pte, pte_mkwrite(*pte));
if ( (unsigned long)va >= VMALLOC_START )
__make_page_writable(machine_to_virt(
*(unsigned long *)pte&PAGE_MASK));
*/
memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
make_page_readonly(new_pgd);
- queue_pgd_pin(__pa(new_pgd));
+ xen_pgd_pin(__pa(new_pgd));
load_cr3(new_pgd);
- queue_pgd_unpin(__pa(old_pgd));
+ flush_page_update_queue();
+ xen_pgd_unpin(__pa(old_pgd));
make_page_writable(old_pgd);
- __flush_tlb_all(); /* implicit flush */
+ __flush_tlb_all();
free_bootmem(__pa(old_pgd), PAGE_SIZE);
kernel_physical_mapping_init(new_pgd);
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
- if (pte) {
+ if (pte)
make_page_readonly(pte);
- flush_page_update_queue();
- }
return pte;
}
clear_page(pte);
make_page_readonly(pte);
- queue_pte_pin(__pa(pte));
- flush_page_update_queue();
+ xen_pte_pin(__pa(pte));
}
void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
struct page *page = virt_to_page(pte);
ClearPageForeign(page);
- queue_pte_unpin(__pa(pte));
+ xen_pte_unpin(__pa(pte));
make_page_writable(pte);
- flush_page_update_queue();
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
out:
make_page_readonly(pgd);
- queue_pgd_pin(__pa(pgd));
- flush_page_update_queue();
+ xen_pgd_pin(__pa(pgd));
}
/* never called when PTRS_PER_PMD > 1 */
{
unsigned long flags; /* can be called from interrupt context */
- queue_pgd_unpin(__pa(pgd));
+ xen_pgd_unpin(__pa(pgd));
make_page_writable(pgd);
- flush_page_update_queue();
if (PTRS_PER_PMD > 1)
return;
pud_t *pud = pud_offset(pgd, (unsigned long)va);
pmd_t *pmd = pmd_offset(pud, (unsigned long)va);
pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+ set_pte(pte, pte_wrprotect(*pte));
}
void make_lowmem_page_writable(void *va)
pud_t *pud = pud_offset(pgd, (unsigned long)va);
pmd_t *pmd = pmd_offset(pud, (unsigned long)va);
pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+ set_pte(pte, pte_mkwrite(*pte));
}
void make_page_readonly(void *va)
pud_t *pud = pud_offset(pgd, (unsigned long)va);
pmd_t *pmd = pmd_offset(pud, (unsigned long)va);
pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
+ set_pte(pte, pte_wrprotect(*pte));
if ( (unsigned long)va >= (unsigned long)high_memory )
{
unsigned long phys;
pud_t *pud = pud_offset(pgd, (unsigned long)va);
pmd_t *pmd = pmd_offset(pud, (unsigned long)va);
pte_t *pte = pte_offset_kernel(pmd, (unsigned long)va);
- queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
+ set_pte(pte, pte_mkwrite(*pte));
if ( (unsigned long)va >= (unsigned long)high_memory )
{
unsigned long phys;
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
-#define set_pte_batched(pteptr, pteval) \
- queue_l1_entry_update(pteptr, (pteval).pte_low)
#define ptep_get_and_clear(xp) __pte_ma(xchg(&(xp)->pte_low, 0))
#define pte_same(a, b) ((a).pte_low == (b).pte_low)